#include <public/xen.h>
#include <asm/hvm/hvm.h>
-#ifdef CONFIG_SMP
static atomic_t waiting_for_crash_ipi;
static int crash_nmi_callback(struct cpu_user_regs *regs, int cpu)
atomic_set(&waiting_for_crash_ipi, num_online_cpus() - 1);
/* Would it be better to replace the trap vector here? */
set_nmi_callback(crash_nmi_callback);
- /* Ensure the new callback function is set before sending
- * out the NMI
- */
+ /* Ensure the new callback function is set before sending out the NMI. */
wmb();
smp_send_nmi_allbutself();
/* Leave the nmi callback set */
disable_local_APIC();
}
-#endif
static void crash_save_xen_notes(void)
{
void machine_crash_shutdown(void)
{
- printk("machine_crash_shutdown: %d\n", smp_processor_id());
local_irq_disable();
-#ifdef CONFIG_SMP
nmi_shootdown_cpus();
-#endif
-#ifdef CONFIG_X86_IO_APIC
disable_IO_APIC();
-#endif
+
hvm_disable();
crash_save_xen_notes();
.stop_bits = 1
};
+ extern void early_page_fault(void);
+ set_intr_gate(TRAP_page_fault, &early_page_fault);
+
/* Parse the command-line options. */
if ( (mbi->flags & MBI_CMDLINE) && (mbi->cmdline != 0) )
cmdline = __va(mbi->cmdline);
kdump_start >>= PAGE_SHIFT;
kdump_size >>= PAGE_SHIFT;
- /* allocate pages for Kdump memory area */
+ /* Allocate pages for Kdump memory area. */
k = alloc_boot_pages_at(kdump_size, kdump_start);
-
if ( k != kdump_start )
panic("Unable to reserve Kdump memory\n");
- /* allocate pages for relocated initial images */
+ /* Allocate pages for relocated initial images. */
k = ((initial_images_end - initial_images_start) & ~PAGE_MASK) ? 1 : 0;
k += (initial_images_end - initial_images_start) >> PAGE_SHIFT;
k = alloc_boot_pages(k, 1);
-
- if ( !k )
+ if ( k == 0 )
panic("Unable to allocate initial images memory\n");
move_memory(k << PAGE_SHIFT, initial_images_start, initial_images_end);
return 0;
}
+/*
+ * Early handler to deal with spurious page faults. For example, consider a
+ * routine that uses a mapping immediately after installing it (making it
+ * present). The CPU may speculatively execute the memory access before
+ * executing the PTE write. The instruction will then be marked to cause a
+ * page fault when it is retired, despite the fact that the PTE is present and
+ * correct at that point in time.
+ */
+asmlinkage int do_early_page_fault(struct cpu_user_regs *regs)
+{
+ static int stuck;
+ static unsigned long prev_eip, prev_cr2;
+ unsigned long cr2 = read_cr2();
+
+ BUG_ON(smp_processor_id() != 0);
+
+ if ( (regs->eip != prev_eip) || (cr2 != prev_cr2) )
+ {
+ prev_eip = regs->eip;
+ prev_cr2 = cr2;
+ stuck = 0;
+ return EXCRET_not_a_fault;
+ }
+
+ if ( stuck++ == 1000 )
+ panic("Early fatal page fault at %04x:%p (cr2=%p, ec=%04x)\n",
+ regs->cs, _p(regs->eip), _p(cr2), regs->error_code);
+
+ return EXCRET_not_a_fault;
+}
+
long do_fpu_taskswitch(int set)
{
struct vcpu *v = current;
idx = (__va - IOREMAP_VIRT_START) >> PAGE_SHIFT;
set_bit(idx, garbage);
}
-
-paddr_t maddr_from_mapped_domain_page(void *va)
-{
- unsigned long __va = (unsigned long)va;
- l2_pgentry_t *pl2e;
- l1_pgentry_t *pl1e;
- unsigned int idx;
- struct mapcache *cache;
- unsigned long mfn;
-
- if ( (__va >= MAPCACHE_VIRT_START) && (__va < MAPCACHE_VIRT_END) )
- {
- cache = &mapcache_current_vcpu()->domain->arch.mapcache;
- idx = ((unsigned long)va - MAPCACHE_VIRT_START) >> PAGE_SHIFT;
- mfn = l1e_get_pfn(cache->l1tab[idx]);
- }
- else
- {
- ASSERT(__va >= IOREMAP_VIRT_START);
- pl2e = virt_to_xen_l2e(__va);
- pl1e = l2e_to_l1e(*pl2e) + l1_table_offset(__va);
- mfn = l1e_get_pfn(*pl1e);
- }
-
- return ((paddr_t)mfn << PAGE_SHIFT) | ((unsigned long)va & ~PAGE_MASK);
-}
pushl $TRAP_spurious_int<<16
jmp handle_exception
+ENTRY(early_page_fault)
+ SAVE_ALL_NOSEGREGS(a)
+ movl %esp,%edx
+ pushl %edx
+ call do_early_page_fault
+ addl $4,%esp
+ jmp restore_all_xen
+
ENTRY(nmi)
#ifdef CONFIG_X86_SUPERVISOR_MODE_KERNEL
# NMI entry protocol is incompatible with guest kernel in ring 0.
call do_double_fault
ud2
+ENTRY(early_page_fault)
+ SAVE_ALL
+ movq %rsp,%rdi
+ call do_early_page_fault
+ jmp restore_all_xen
+
ENTRY(nmi)
pushq $0
SAVE_ALL
void *map_domain_page_global(unsigned long mfn);
void unmap_domain_page_global(void *va);
-/*
- * Convert a VA (within a page previously mapped in the context of the
- * currently-executing VCPU via a call to map_domain_page(), or via a
- * previous call to map_domain_page_global()) to the mapped machine address.
- */
-paddr_t maddr_from_mapped_domain_page(void *va);
-
#define DMCACHE_ENTRY_VALID 1U
#define DMCACHE_ENTRY_HELD 2U
#define map_domain_page_global(mfn) maddr_to_virt((mfn)<<PAGE_SHIFT)
#define unmap_domain_page_global(va) ((void)(va))
-#define maddr_from_mapped_domain_page(va) (virt_to_maddr(va))
-
struct domain_mmap_cache {
};